In this report, we reproduce the analyses using data from fMRI study 1 reported in Supplementary Material.

prep data

First, we load the relevant packages, define functions and plotting aesthetics, and load and tidy the data.

load packages

library(pacman)
pacman::p_load(tidyverse, purrr, fs, knitr, lmerTest, ggeffects, kableExtra, boot, devtools, install = TRUE)
devtools::install_github("hadley/emo")

define functions

source("https://gist.githubusercontent.com/benmarwick/2a1bb0133ff568cbe28d/raw/fb53bd97121f7f9ce947837ef1a4c65a73bffb3f/geom_flat_violin.R")

# MLM results table function
table_model = function(model_data) {
  model_data %>%
  broom.mixed::tidy(conf.int = TRUE) %>%
  filter(effect == "fixed") %>%
  rename("SE" = std.error,
         "t" = statistic,
         "p" = p.value) %>%
  select(-group, -effect) %>%
  mutate_at(vars(-contains("term"), -contains("p")), round, 2) %>%
  mutate(term = gsub("cond", "", term),
         term = gsub("\\(Intercept\\)", "intercept", term),
         term = gsub("condother", "other", term),
         term = gsub("condself", "self", term),
         term = gsub("topichealth", "topic (health)", term),
         term = gsub("self_referential", "self-referential", term),
         term = gsub("self_relevance", "self-relevance", term),
         term = gsub("social_relevance", "social relevance", term),
         term = gsub(":", " x ", term),
         p = ifelse(p < .001, "< .001",
             ifelse(p > .999, "1.000", gsub("0.(.*)", ".\\1", sprintf("%.3f", p)))),
         `b [95% CI]` = sprintf("%.2f [%0.2f, %.2f]", estimate, conf.low, conf.high)) %>%
  select(term, `b [95% CI]`, df, t, p)
}

simple_slopes = function(model, var, moderator, continuous = TRUE) {
  
  if (isTRUE(continuous)) {
    emmeans::emtrends(model, as.formula(paste("~", moderator)), var = var) %>%
      data.frame() %>%
      rename("trend" = 2) %>%
      mutate(`b [95% CI]` = sprintf("%.2f [%.2f, %.2f]", trend, asymp.LCL, asymp.UCL)) %>%
      select(!!moderator, `b [95% CI]`) %>%
      kable()  %>%
      kableExtra::kable_styling()
    
  } else {
    confint(emmeans::contrast(emmeans::emmeans(model, as.formula(paste("~", var, "|", moderator))), "revpairwise", by = moderator, adjust = "none")) %>%
      data.frame() %>%
      filter(grepl("control", contrast)) %>%
      mutate(`b [95% CI]` = sprintf("%.2f [%.2f, %.2f]", estimate, asymp.LCL, asymp.UCL)) %>%
      select(contrast, !!moderator, `b [95% CI]`) %>%
      arrange(contrast) %>%
      kable()  %>%
      kableExtra::kable_styling()
  }
}

define aesthetics

palette_condition = c("self" = "#ee9b00",
                      "control" = "#bb3e03",
                      "other" = "#005f73")

palette_sharing = c("#0a9396", "#ee9b00")
palette_roi = c("self-referential" = "#ee9b00",
               "mentalizing" = "#005f73")
palette_dv = c("self-relevance" = "#ee9b00",
               "social relevance" = "#005f73",
               "sharing" = "#56282D")
palette_topic = c("climate" = "#E6805E",
                 "health" = "#3A3357")

plot_aes = theme_minimal() +
  theme(legend.position = "top",
        legend.text = element_text(size = 12),
        text = element_text(size = 16, family = "Futura Medium"),
        panel.grid.major = element_blank(),
        panel.grid.minor = element_blank(),
        axis.text = element_text(color = "black"),
        axis.line = element_line(colour = "black"),
        axis.ticks.y = element_blank())

load and tidy data

merged_all = read.csv("../data/study1_data.csv")

merged = merged_all %>%
  filter(outlier == "no" | is.na(outlier)) %>%
  group_by(pID, atlas) %>%
  mutate(parameter_estimate_std = parameter_estimate / sd(parameter_estimate, na.rm = TRUE)) 

merged_wide = merged %>%
  filter(atlas %in% c("self-referential", "mentalizing")) %>%
  select(site, pID, trial, topic, cond, value, self_relevance, social_relevance, atlas, parameter_estimate_std) %>%
  spread(atlas, parameter_estimate_std) %>%
  rename("self_referential" = `self-referential`)

merged_wide_alt = merged %>%
  filter(atlas %in% c("pnas_self", "pnas_mentalizing_nopc")) %>%
  select(site, pID, trial, topic, cond, value, self_relevance, social_relevance, atlas, parameter_estimate_std) %>%
  spread(atlas, parameter_estimate_std) %>%
  rename("self_referential" = pnas_self,
         "mentalizing" = pnas_mentalizing_nopc) 



sensitivity analyses

Given the high correlation between the preregistered Neurosynth ROIs, we conducted sensitivity analyses using ROIs from Scholz et al. (2017) A neural model of valuation and information virality.

In order to maximize the differentiation between the self-referential and mentalizing ROIs, we removed the PCC/precuneus cluster from the mentalizing ROI as it overlapped with the self-referential ROI.

ROI correlations

Compared to the preregistered Neurosynth ROIs (r = .94, 95% CI [.94, .94]), the correlation between the alternative ROIs are substantially reduced.

merged_wide_alt %>%
  rmcorr::rmcorr(as.factor(pID), mentalizing, self_referential, data = .)
## 
## Repeated measures correlation
## 
## r
## 0.5588849
## 
## degrees of freedom
## 5928
## 
## p-value
## 0
## 
## 95% confidence interval
## 0.5411267 0.5761449

H1

Is greater activity in the ROIs associated with higher self and social relevance ratings?

self-referential ROI

✅ H1a: Greater activity in the self-referential ROI will be associated with higher self-relevance ratings

mod_h1a =  lmer(self_relevance ~ self_referential + (1 + self_referential | pID),
               data = merged_wide_alt,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h1a = table_model(mod_h1a)

table_h1a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.56 [2.48, 2.64] 84.62 65.06 < .001
self-referential 0.03 [0.01, 0.06] 83.66 2.40 .018

summary

summary(mod_h1a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_relevance ~ self_referential + (1 + self_referential | pID)
##    Data: merged_wide_alt
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16758.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4707 -0.7086  0.1383  0.6783  2.4399 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr 
##  pID      (Intercept)      0.116790 0.34175       
##           self_referential 0.001606 0.04008  -0.79
##  Residual                  0.918028 0.95814       
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##                  Estimate Std. Error       df t value            Pr(>|t|)    
## (Intercept)       2.55698    0.03930 84.61525  65.056 <0.0000000000000002 ***
## self_referential  0.03131    0.01303 83.66472   2.403              0.0185 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## self_rfrntl -0.344

mentalizing ROI

✅ H1b: Greater activity in the mentalizing ROI will be associated with higher social relevance ratings

mod_h1b = lmer(social_relevance ~ mentalizing + (1 + mentalizing | pID),
               data = merged_wide_alt,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h1b = table_model(mod_h1b)

table_h1b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.67 [2.58, 2.75] 84.07 64.07 < .001
mentalizing 0.03 [0.01, 0.06] 82.98 2.86 .005

summary

summary(mod_h1b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: social_relevance ~ mentalizing + (1 + mentalizing | pID)
##    Data: merged_wide_alt
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 15841.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.8050 -0.7206  0.1710  0.6480  2.6691 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr 
##  pID      (Intercept) 0.134851 0.36722       
##           mentalizing 0.001039 0.03224  -0.09
##  Residual             0.784321 0.88562       
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##             Estimate Std. Error       df t value             Pr(>|t|)    
## (Intercept)  2.66512    0.04160 84.07281  64.067 < 0.0000000000000002 ***
## mentalizing  0.03406    0.01192 82.97654   2.856              0.00541 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## mentalizing -0.100

combined plot

predicted = ggeffects::ggpredict(mod_h1a, c("self_referential [-4.5:5]")) %>%
  data.frame() %>%
  mutate(roi = "self-referential",
         variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h1b, c("mentalizing [-4.5:5]")) %>%
              data.frame() %>%
              mutate(roi = "mentalizing",
                     variable = "social relevance"))

ind_data = merged_wide %>%
  select(pID, trial, contains("relevance"), mentalizing, self_referential) %>%
  rename("self-referential" = self_referential) %>%
  gather(variable, predicted, contains("relevance")) %>%
  mutate(variable = gsub("self_relevance", "self-relevance", variable),
         variable = gsub("social_relevance", "social relevance", variable)) %>%
  gather(roi, x, mentalizing, `self-referential`) %>%
  filter(!(variable == "self-relevance" & roi == "mentalizing") & ! (variable == "social relevance" & roi == "self-referential"))

(plot_h1 = predicted %>%
  ggplot(aes(x, predicted)) +
  stat_smooth(data = ind_data, aes(group = pID, color = roi), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high, fill = roi), alpha = .3, color = NA) +
  geom_line(aes(color = roi), size = 2) +
  facet_grid(~variable) +
  scale_color_manual(name = "", values = palette_roi, guide = FALSE) +
  scale_fill_manual(name = "", values = palette_roi, guide = FALSE) +
  labs(x = "\nROI activity (SD)", y = "predicted rating\n") +
  plot_aes  +
  theme(legend.position = "top",
        legend.key.width=unit(2,"cm")))

H4

Do the manipulations increase neural activity in brain regions associated with self-referential processing and mentalizing?

self-referential ROI

✅ H4a: Self-focused intervention (compared to control) will increase brain activity in ROIs related to self-referential processes.

mod_h4a = lmer(self_referential ~ cond + (1 + cond | pID),
              data = merged_wide_alt,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h4a = table_model(mod_h4a)

table_h4a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 0.23 [0.12, 0.33] 84.12 4.38 < .001
other 0.11 [0.03, 0.19] 84.42 2.86 .005
self 0.13 [0.04, 0.21] 83.69 2.84 .006

summary

summary(mod_h4a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_referential ~ cond + (1 + cond | pID)
##    Data: merged_wide_alt
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 17254.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.4678 -0.6543 -0.0102  0.6474  3.5891 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  pID      (Intercept) 0.18879  0.4345              
##           condother   0.04997  0.2235   -0.05      
##           condself    0.08492  0.2914    0.05  0.49
##  Residual             0.97421  0.9870              
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##             Estimate Std. Error       df t value  Pr(>|t|)    
## (Intercept)  0.22817    0.05204 84.12046   4.385 0.0000334 ***
## condother    0.11309    0.03950 84.41676   2.863   0.00529 ** 
## condself     0.12606    0.04441 83.69460   2.838   0.00569 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr) cndthr
## condother -0.267       
## condself  -0.176  0.493

mentalizing ROI

❌ H4b: Other-focused intervention (compared to control) will increase brain activity in ROIs related to mentalizing processes.

mod_h4b = lmer(mentalizing ~ cond + (1 + cond | pID),
              data = merged_wide_alt,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h4b = table_model(mod_h4b)

table_h4b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 0.27 [0.15, 0.39] 84.19 4.58 < .001
other 0.01 [-0.07, 0.08] 83.16 0.21 .837
self 0.05 [-0.04, 0.13] 84.03 1.11 .271

summary

summary(mod_h4b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: mentalizing ~ cond + (1 + cond | pID)
##    Data: merged_wide_alt
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 17292.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.4694 -0.6574 -0.0019  0.6645  4.0424 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  pID      (Intercept) 0.25179  0.5018              
##           condother   0.04215  0.2053   -0.45      
##           condself    0.07355  0.2712   -0.25  0.87
##  Residual             0.98487  0.9924              
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)  0.269101   0.058773 84.192890   4.579 0.000016 ***
## condother    0.007961   0.038457 83.160115   0.207    0.837    
## condself     0.047633   0.043001 84.025816   1.108    0.271    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr) cndthr
## condother -0.461       
## condself  -0.354  0.641

combined plot

predicted_h4 = ggeffects::ggpredict(mod_h4a, c("cond")) %>%
  data.frame() %>%
  mutate(atlas = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h4b, c("cond")) %>%
              data.frame() %>%
              mutate(atlas = "mentalizing")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

ind_data_h4 = merged %>%
  mutate(atlas = recode(atlas, "pnas_self" = "self-referential",
                        "pnas_mentalizing_nopc" = "mentalizing")) %>%
  filter(atlas %in% c("self-referential", "mentalizing")) %>%
  select(pID, cond, run, trial, atlas, parameter_estimate_std) %>%
  unique() %>%
  rename("x" = cond,
         "predicted" = parameter_estimate_std) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

(plot_h4 = predicted_h4 %>%
  ggplot(aes(x = x, y = predicted)) +
  stat_summary(data = ind_data_h4, aes(group = pID), fun = "mean", geom = "line",
               size = .1, color = "grey50") +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1) +
  geom_pointrange(aes(color = x, ymin = conf.low, ymax = conf.high), size = .75) +
  facet_grid(~atlas) +
  scale_color_manual(name = "", values = palette_condition, guide = "none") +
  scale_alpha_manual(name = "", values = c(1, .5)) +
  labs(x = "", y = "ROI activity (SD)\n") +
  plot_aes +
  theme(legend.position = c(.85, .15)))

H6

Is ROI activity positively related to sharing intentions?

self-referential ROI

✅ Stronger activity in the self-referential ROI will be related to higher sharing intentions.

mod_h6a = lmer(value ~ self_referential + (1 + self_referential | pID),
              data = merged_wide_alt,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h6a = table_model(mod_h6a)

table_h6a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.60 [2.52, 2.68] 85.08 67.91 < .001
self-referential 0.06 [0.03, 0.09] 83.56 4.40 < .001

summary

summary(mod_h6a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ self_referential + (1 + self_referential | pID)
##    Data: merged_wide_alt
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16641.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5502 -0.7218  0.1157  0.7344  2.1951 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr 
##  pID      (Intercept)      0.10916  0.33040       
##           self_referential 0.00401  0.06333  -0.36
##  Residual                  0.93217  0.96549       
## Number of obs: 5935, groups:  pID, 85
## 
## Fixed effects:
##                  Estimate Std. Error       df t value             Pr(>|t|)    
## (Intercept)       2.59963    0.03828 85.07741  67.910 < 0.0000000000000002 ***
## self_referential  0.06272    0.01427 83.55508   4.396            0.0000323 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## self_rfrntl -0.251

mentalizing ROI

✅ Stronger activation in the mentalizing ROI will be related to higher sharing intentions.

mod_h6b = lmer(value ~ mentalizing + (1 + mentalizing | pID),
              data = merged_wide_alt,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h6b = table_model(mod_h6b)

table_h6b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.61 [2.53, 2.68] 84.71 68.68 < .001
mentalizing 0.04 [0.02, 0.07] 83.49 3.41 .001

summary

summary(mod_h6b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ mentalizing + (1 + mentalizing | pID)
##    Data: merged_wide_alt
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16658.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.6298 -0.7135  0.1138  0.7344  2.0984 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr
##  pID      (Intercept) 0.107678 0.32814      
##           mentalizing 0.001175 0.03428  0.08
##  Residual             0.936640 0.96780      
## Number of obs: 5935, groups:  pID, 85
## 
## Fixed effects:
##             Estimate Std. Error       df t value             Pr(>|t|)    
## (Intercept)  2.60723    0.03796 84.70738  68.683 < 0.0000000000000002 ***
## mentalizing  0.04450    0.01306 83.48501   3.407              0.00101 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## mentalizing -0.070

combined plot

vals = seq(-4.5,4.5,.1)

predicted_h6 = ggeffects::ggpredict(mod_h6a, c("self_referential [vals]")) %>%
  data.frame() %>%
  mutate(roi = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h6b, c("mentalizing [vals]")) %>%
              data.frame() %>%
              mutate(roi = "mentalizing")) %>%
  mutate(roi = factor(roi, levels = c("self-referential", "mentalizing")))

ind_data_h6 = merged %>%
  select(pID, cond, run, trial, atlas, parameter_estimate_std, value) %>%
  rename("x" = parameter_estimate_std,
         "predicted" = value,
         "roi" = atlas) %>%
  mutate(roi = factor(roi, levels = c("self-referential", "mentalizing")))

predicted_h6 %>%
  ggplot(aes(x = x, y = predicted, color = roi, fill = roi)) +
  stat_smooth(data = ind_data_h6, aes(group = pID), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .2, color = NA) +
  geom_line(size = 2) +
  facet_grid(~roi) +
  scale_color_manual(name = "", values = palette_roi) +
  scale_fill_manual(name = "", values = palette_roi) +
  labs(y = "predicted sharing intention\n", x = "\nROI activity (SD)") +
  plot_aes +
  theme(legend.position = "none")

H7

Is there an indirect effect of the condition on sharing intentions through activity in self-referential and mentalizing ROIs?

prep data

# source functions
source("indirectMLM.R")

# create self condition dataframe
data_med_self = merged %>%
  filter(!cond == "other" & atlas == "self-referential") %>%
  mutate(cond = ifelse(cond == "self", 1, 0)) %>%
  select(pID, site, trial, cond, value, parameter_estimate) %>%
  data.frame()

# create social condition dataframe
data_med_other = merged %>%
  filter(!cond == "self" & atlas == "mentalizing") %>%
  mutate(cond = ifelse(cond == "other", 1, 0)) %>%
  select(pID, site, trial, cond, value, parameter_estimate) %>%
  data.frame()

# define variables
y_var = "value"
m_var = "parameter_estimate"

self condition

✅ H7a: The effect of self-focused intervention on sharing intention will be mediated by increased activity in the self-referential ROI.

model_name = "mediation_self"
data = data_med_self

if (file.exists(sprintf("models/model_%s_alternative.RDS", model_name))) {
  assign(get("model_name"), readRDS(sprintf("models/model_%s_alternative.RDS", model_name)))
} else {
  assign(get("model_name"), boot(data = data, statistic = indirect.mlm, R = 500,
                                 y = y_var, x = "cond", mediator = m_var, group.id = "pID",
                                 between.m = F, uncentered.x = F))
  saveRDS(eval(parse(text = model_name)), sprintf("models/model_%s_alternative.RDS", model_name))
}

indirect.mlm.summary(get(model_name))
## #### Population Covariance ####
## Covariance of Random Slopes a and b: 0.001 [-0.002, 0.011]
## 
## 
## #### Indirect Effects ####
## # Within-subject Effects
## Unbiased Estimate of Within-subjects Indirect Effect: 0.006 [0.001, 0.017]
## Biased Estimate of Within-subjects Indirect Effect: 0.005 [-0.001, 0.011]
## Bias in Within-subjects Indirect Effect: 0.001 [0, 0.011]
## 
## 
## #### Total Effect ####
## Unbiased Estimate of Total Effect: -0.046 [-0.106, 0.014]
## Biased Total Effect of X on Y (c path): -0.044 [-0.103, 0.018]
## Bias in Total Effect: 0.002 [0, 0.007]
## 
## 
## #### Direct Effects ####
## Direct Effect of Predictor on Dependent Variable (c' path): -0.052 [-0.113, 0.004]
## Within-subjects Effect of Predictor on Mediator (a path for group-mean centered predictor): 0.029 [-0.005, 0.056]
## Within-subjects Effect of Mediator on Dependent Variable (b path for group-mean centered mediator): 0.169 [0.112, 0.255]

other condition

❌ H7b: The effect of other-focused intervention on sharing intention will be mediated by increased activity in the mentalizing ROI.

model_name = "mediation_other"
data = data_med_other

if (file.exists(sprintf("models/model_%s_alternative.RDS", model_name))) {
  assign(get("model_name"), readRDS(sprintf("models/model_%s_alternative.RDS", model_name)))
} else {
  assign(get("model_name"), boot(data = data, statistic = indirect.mlm, R = 500,
                                 y = y_var, x = "cond", mediator = m_var, group.id = "pID",
                                 between.m = F, uncentered.x = F))
  saveRDS(eval(parse(text = model_name)), sprintf("models/model_%s_alternative.RDS", model_name))
}

indirect.mlm.summary(get(model_name))
## #### Population Covariance ####
## Covariance of Random Slopes a and b: 0 [-0.004, 0.006]
## 
## 
## #### Indirect Effects ####
## # Within-subject Effects
## Unbiased Estimate of Within-subjects Indirect Effect: 0.003 [-0.002, 0.012]
## Biased Estimate of Within-subjects Indirect Effect: 0.003 [-0.001, 0.01]
## Bias in Within-subjects Indirect Effect: 0 [0, 0.006]
## 
## 
## #### Total Effect ####
## Unbiased Estimate of Total Effect: -0.032 [-0.097, 0.027]
## Biased Total Effect of X on Y (c path): -0.032 [-0.097, 0.027]
## Bias in Total Effect: 0.001 [0, 0.004]
## 
## 
## #### Direct Effects ####
## Direct Effect of Predictor on Dependent Variable (c' path): -0.035 [-0.102, 0.023]
## Within-subjects Effect of Predictor on Mediator (a path for group-mean centered predictor): 0.016 [-0.005, 0.039]
## Within-subjects Effect of Mediator on Dependent Variable (b path for group-mean centered mediator): 0.191 [0.147, 0.306]



combined table

table_h1a %>% mutate(DV = "H1a: Self-relevance") %>%
  bind_rows(table_h1b %>% mutate(DV = "H1b: Social relevance")) %>%
  bind_rows(table_h4a %>% mutate(DV = "H4a: Self-referential ROI")) %>%
  bind_rows(table_h4b %>% mutate(DV = "H4b: Mentalizing ROI")) %>%
  bind_rows(table_h6a %>% mutate(DV = "H6a: Sharing intention")) %>%
  bind_rows(table_h6b %>% mutate(DV = "H6b: Sharing intention")) %>%
  select(DV, everything()) %>%
  kable() %>%
  kable_styling()
DV term b [95% CI] df t p
H1a: Self-relevance intercept 2.56 [2.48, 2.64] 84.62 65.06 < .001
H1a: Self-relevance self-referential 0.03 [0.01, 0.06] 83.66 2.40 .018
H1b: Social relevance intercept 2.67 [2.58, 2.75] 84.07 64.07 < .001
H1b: Social relevance mentalizing 0.03 [0.01, 0.06] 82.98 2.86 .005
H4a: Self-referential ROI intercept 0.23 [0.12, 0.33] 84.12 4.38 < .001
H4a: Self-referential ROI other 0.11 [0.03, 0.19] 84.42 2.86 .005
H4a: Self-referential ROI self 0.13 [0.04, 0.21] 83.69 2.84 .006
H4b: Mentalizing ROI intercept 0.27 [0.15, 0.39] 84.19 4.58 < .001
H4b: Mentalizing ROI other 0.01 [-0.07, 0.08] 83.16 0.21 .837
H4b: Mentalizing ROI self 0.05 [-0.04, 0.13] 84.03 1.11 .271
H6a: Sharing intention intercept 2.60 [2.52, 2.68] 85.08 67.91 < .001
H6a: Sharing intention self-referential 0.06 [0.03, 0.09] 83.56 4.40 < .001
H6b: Sharing intention intercept 2.61 [2.53, 2.68] 84.71 68.68 < .001
H6b: Sharing intention mentalizing 0.04 [0.02, 0.07] 83.49 3.41 .001

moderation by article topic

These analyses explore whether the analyses reported in study 1 of the main manuscript are moderated by article topic (health or climate).

H1

Are the relationships between ROI activity and self and social relevance ratings moderated by article topic?

self-referential ROI

There is a main effect of topic, such that health articles elicited greater activity in the self-referential ROI compared to climate articles.

These data are not consistent with moderation by topic.

mod_h1a =  lmer(self_relevance ~ self_referential * topic + (1 | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h1a = table_model(mod_h1a)

table_h1a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.49 [2.41, 2.57] 102.45 61.81 < .001
self-referential 0.03 [0.00, 0.07] 6009.96 2.02 .044
topic (health) 0.14 [0.09, 0.19] 5927.41 5.50 < .001
self-referential x topic (health) 0.02 [-0.02, 0.06] 5942.54 0.92 .355

summary

summary(mod_h1a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_relevance ~ self_referential * topic + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16734
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5460 -0.6971  0.1407  0.6772  2.3734 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1122   0.3350  
##  Residual             0.9134   0.9557  
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##                                Estimate Std. Error         df t value
## (Intercept)                     2.49252    0.04033  102.45246  61.806
## self_referential                0.03363    0.01667 6009.96414   2.018
## topichealth                     0.13678    0.02489 5927.40995   5.496
## self_referential:topichealth    0.02068    0.02238 5942.53630   0.924
##                                          Pr(>|t|)    
## (Intercept)                  < 0.0000000000000002 ***
## self_referential                           0.0437 *  
## topichealth                          0.0000000404 ***
## self_referential:topichealth               0.3554    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) slf_rf tpchlt
## self_rfrntl -0.035              
## topichealth -0.304  0.045       
## slf_rfrntl:  0.024 -0.677 -0.126

mentalizing ROI

There is a main effect of topic, such that health articles elicited greater activity in the mentalizing ROI compared to climate articles.

These data are not consistent with moderation by topic.

mod_h1b = lmer(social_relevance ~ mentalizing * topic + (1 + mentalizing | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h1b = table_model(mod_h1b)

table_h1b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.51 [2.43, 2.60] 97.93 58.12 < .001
mentalizing 0.03 [0.00, 0.07] 226.60 2.16 .032
topic (health) 0.29 [0.24, 0.34] 5924.99 12.18 < .001
mentalizing x topic (health) 0.01 [-0.03, 0.05] 5903.41 0.66 .512

summary

summary(mod_h1b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: social_relevance ~ mentalizing * topic + (1 + mentalizing | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 15673.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.7740 -0.7047  0.1262  0.6762  2.7725 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr 
##  pID      (Intercept) 0.134996 0.36742       
##           mentalizing 0.001968 0.04436  -0.17
##  Residual             0.760437 0.87203       
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##                           Estimate Std. Error         df t value
## (Intercept)                2.51344    0.04325   97.92856  58.119
## mentalizing                0.03426    0.01585  226.60228   2.162
## topichealth                0.29013    0.02382 5924.99396  12.178
## mentalizing:topichealth    0.01347    0.02055 5903.41026   0.655
##                                    Pr(>|t|)    
## (Intercept)             <0.0000000000000002 ***
## mentalizing                          0.0317 *  
## topichealth             <0.0000000000000002 ***
## mentalizing:topichealth              0.5123    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) mntlzn tpchlt
## mentalizing -0.156              
## topichealth -0.267  0.173       
## mntlzng:tpc  0.076 -0.636 -0.323

combined plot

predicted = ggeffects::ggpredict(mod_h1a, c("self_referential [-4.5:5]", "topic")) %>%
  data.frame() %>%
  mutate(roi = "self-referential",
         variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h1b, c("mentalizing [-4.5:5]", "topic")) %>%
              data.frame() %>%
              mutate(roi = "mentalizing",
                     variable = "social relevance"))

ind_data = merged_wide %>%
  select(topic, pID, trial, contains("relevance"), mentalizing, self_referential) %>%
  rename("self-referential" = self_referential,
         "group" = topic) %>%
  gather(variable, predicted, contains("relevance")) %>%
  mutate(variable = gsub("self_relevance", "self-relevance", variable),
         variable = gsub("social_relevance", "social relevance", variable)) %>%
  gather(roi, x, mentalizing, `self-referential`) %>%
  filter(!(variable == "self-relevance" & roi == "mentalizing") & ! (variable == "social relevance" & roi == "self-referential"))

(plot_h1 = predicted %>%
  ggplot(aes(x, predicted, color = group, fill = group)) +
  stat_smooth(data = ind_data, aes(group = interaction(pID, group)), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .3, color = NA) +
  geom_line(size = 2) +
  facet_grid(~variable) +
  scale_color_manual(name = "", values = palette_topic) +
  scale_fill_manual(name = "", values = palette_topic) +
  labs(x = "\nROI activity (SD)", y = "predicted rating\n") +
  plot_aes  +
  theme(legend.position = "top",
        legend.key.width=unit(2,"cm")))

H2

Are the effects of the experimental manipulations on relevance moderated by article topic?

self-relevance

There is a main effect of topic such that health articles are rated as more self-relevant than climate articles.

The was also an interaction such that the effect of the self-focused condition on self-relevance was weaker for health articles.

mod_h2a = lmer(self_relevance ~ cond * topic + (1 | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h2a = table_model(mod_h2a)

table_h2a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.44 [2.35, 2.54] 192.81 51.81 < .001
other 0.04 [-0.04, 0.13] 5924.29 0.99 .322
self 0.12 [0.03, 0.20] 5924.33 2.76 .006
topic (health) 0.23 [0.14, 0.31] 5924.32 5.29 < .001
other x topic (health) -0.07 [-0.19, 0.05] 5924.39 -1.21 .225
self x topic (health) -0.17 [-0.29, -0.05] 5924.39 -2.82 .005

summary

summary(mod_h2a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_relevance ~ cond * topic + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16744.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5020 -0.6957  0.1390  0.6725  2.4230 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1115   0.3339  
##  Residual             0.9144   0.9563  
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##                         Estimate Std. Error         df t value
## (Intercept)              2.44206    0.04713  192.80567  51.813
## condother                0.04231    0.04272 5924.28614   0.991
## condself                 0.11794    0.04271 5924.32872   2.762
## topichealth              0.22583    0.04273 5924.32377   5.285
## condother:topichealth   -0.07333    0.06041 5924.38737  -1.214
## condself:topichealth    -0.17055    0.06043 5924.39008  -2.822
##                                   Pr(>|t|)    
## (Intercept)           < 0.0000000000000002 ***
## condother                          0.32194    
## condself                           0.00577 ** 
## topichealth                     0.00000013 ***
## condother:topichealth              0.22487    
## condself:topichealth               0.00478 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf tpchlt cndth:
## condother   -0.452                            
## condself    -0.452  0.499                     
## topichealth -0.452  0.499  0.499              
## cndthr:tpch  0.320 -0.707 -0.353 -0.707       
## cndslf:tpch  0.319 -0.353 -0.707 -0.707  0.500

social relevance

There is a main effect of topic such that health articles are rated as more socially relevant than climate articles.

These data are not consistent with moderation by topic.

mod_h2b = lmer(social_relevance ~ cond * topic + (1 | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h2b = table_model(mod_h2b)

table_h2b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.49 [2.40, 2.59] 158.48 51.69 < .001
other 0.03 [-0.05, 0.11] 5924.24 0.75 .451
self 0.07 [-0.00, 0.15] 5924.27 1.87 .062
topic (health) 0.31 [0.23, 0.38] 5924.26 7.89 < .001
other x topic (health) 0.03 [-0.08, 0.14] 5924.31 0.55 .584
self x topic (health) -0.06 [-0.16, 0.05] 5924.31 -1.02 .307

summary

summary(mod_h2b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: social_relevance ~ cond * topic + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 15688.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.7955 -0.7046  0.1166  0.6685  2.7653 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1328   0.3644  
##  Residual             0.7637   0.8739  
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##                         Estimate Std. Error         df t value
## (Intercept)              2.49087    0.04819  158.48308  51.688
## condother                0.02943    0.03904 5924.23777   0.754
## condself                 0.07291    0.03903 5924.26906   1.868
## topichealth              0.30819    0.03905 5924.26486   7.893
## condother:topichealth    0.03025    0.05521 5924.31128   0.548
## condself:topichealth    -0.05639    0.05522 5924.31350  -1.021
##                                   Pr(>|t|)    
## (Intercept)           < 0.0000000000000002 ***
## condother                           0.4509    
## condself                            0.0618 .  
## topichealth            0.00000000000000349 ***
## condother:topichealth               0.5838    
## condself:topichealth                0.3072    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf tpchlt cndth:
## condother   -0.404                            
## condself    -0.404  0.499                     
## topichealth -0.404  0.499  0.499              
## cndthr:tpch  0.286 -0.707 -0.353 -0.707       
## cndslf:tpch  0.286 -0.353 -0.707 -0.707  0.500

combined plot

predicted_h2 = ggeffects::ggpredict(mod_h2a, c("cond", "topic")) %>%
  data.frame() %>%
  mutate(model = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h2b, c("cond", "topic")) %>%
              data.frame() %>%
              mutate(model = "social relevance")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))

ind_data_h2 = merged_wide %>%
  rename("x" = cond,
         "group" = topic) %>%
  gather(model, predicted, self_relevance, social_relevance) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         model = gsub("self_relevance", "self-relevance", model),
         model = gsub("social_relevance", "social relevance", model))
  
(plot_h2 = predicted_h2 %>%
  ggplot(aes(x = x, y = predicted, color = group)) +
  stat_summary(data = ind_data_h2, aes(group = interaction(pID, group)), fun = "mean", geom = "line", size = .1) +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1, position = position_dodge(.1)) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high, group = group),
                  size = .75, position = position_dodge(.1)) +
  facet_grid(~model) +
  scale_color_manual(name = "", values = palette_topic) +
  labs(x = "", y = "predicted rating\n") +
  plot_aes +
  theme(legend.position = c(.85, .15)))

H3

Are the relationships between self and social relevance and sharing intentions moderated by article topic?

The relationship between self-relevance and sharing intentions was not moderated by topic.

However, the relationship between social relevance and sharing intentions was slightly stronger for health articles compared to climate articles.

mod_h3 = lmer(value ~ self_relevance * topic + social_relevance * topic + (1 + self_relevance + social_relevance | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

plot

predicted = ggeffects::ggpredict(mod_h3, c("self_relevance", "topic")) %>%
  data.frame() %>%
  mutate(variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h3, c("social_relevance", "topic")) %>%
              data.frame() %>%
              mutate(variable = "social relevance"))

points = merged_wide %>%
  rename("self-referential" = self_referential,
         "predicted" = value,
         "group" = topic) %>%
  gather(variable, x, contains("relevance")) %>%
  mutate(variable = gsub("self_relevance", "self-relevance", variable),
         variable = gsub("social_relevance", "social relevance", variable))

(plot_rel_sharing = predicted %>%
  ggplot(aes(x, predicted, color = group, fill = group)) +
  stat_smooth(data = points, aes(group = interaction(pID, group)), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .2, color = NA) +
  geom_line(size = 2) +
  facet_grid(~variable) +
  scale_color_manual(name = "", values = palette_topic) +
  scale_fill_manual(name = "", values = palette_topic) +
  labs(x = "\nrating", y = "predicted sharing intention\n") +
  plot_aes)

model table

table_h3 = table_model(mod_h3)

table_h3 %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 1.26 [1.12, 1.40] 116.24 17.61 < .001
self-relevance 0.29 [0.24, 0.34] 275.19 10.76 < .001
topic (health) -0.11 [-0.25, 0.03] 5856.15 -1.53 .127
social relevance 0.21 [0.14, 0.27] 180.07 6.53 < .001
self-relevance x topic (health) 0.03 [-0.03, 0.09] 5555.76 1.04 .297
topic (health) x social relevance 0.06 [0.00, 0.13] 5628.34 1.99 .047

summary

summary(mod_h3)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ self_relevance * topic + social_relevance * topic + (1 +  
##     self_relevance + social_relevance | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 14865.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2186 -0.6964  0.0530  0.6918  3.0662 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr       
##  pID      (Intercept)      0.22828  0.4778              
##           self_relevance   0.01296  0.1139   -0.28      
##           social_relevance 0.03176  0.1782   -0.58 -0.52
##  Residual                  0.68009  0.8247              
## Number of obs: 5935, groups:  pID, 85
## 
## Fixed effects:
##                                Estimate Std. Error         df t value
## (Intercept)                     1.25629    0.07133  116.24162  17.613
## self_relevance                  0.29118    0.02706  275.18942  10.761
## topichealth                    -0.10724    0.07030 5856.14632  -1.525
## social_relevance                0.20657    0.03163  180.06539   6.530
## self_relevance:topichealth      0.03169    0.03036 5555.75910   1.044
## topichealth:social_relevance    0.06361    0.03202 5628.34298   1.986
##                                          Pr(>|t|)    
## (Intercept)                  < 0.0000000000000002 ***
## self_relevance               < 0.0000000000000002 ***
## topichealth                                 0.127    
## social_relevance                   0.000000000648 ***
## self_relevance:topichealth                  0.297    
## topichealth:social_relevance                0.047 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) slf_rl tpchlt scl_rl slf_r:
## self_relvnc -0.272                            
## topichealth -0.438  0.182                     
## socil_rlvnc -0.481 -0.655  0.192              
## slf_rlvnc:t  0.154 -0.679 -0.293  0.435       
## tpchlth:sc_  0.195  0.477 -0.467 -0.575 -0.675

H4

Are the effects of the experimental manipulations on ROI activity moderated by article topic?

self-referential ROI

There is a main effect of topic, such that health articles elicited greater activity in the self-referential ROI compared to climate articles.

These data are not consistent with moderation by topic.

mod_h4a = lmer(self_referential ~ cond * topic + (1 + cond | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h4a = table_model(mod_h4a)

table_h4a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 0.01 [-0.11, 0.13] 111.15 0.23 .817
other 0.09 [-0.01, 0.19] 223.56 1.76 .080
self 0.13 [0.02, 0.23] 194.40 2.38 .018
topic (health) 0.14 [0.05, 0.23] 5759.54 3.14 .002
other x topic (health) -0.01 [-0.13, 0.12] 5759.09 -0.10 .923
self x topic (health) -0.08 [-0.20, 0.05] 5759.09 -1.23 .219

summary

summary(mod_h4a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_referential ~ cond * topic + (1 + cond | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 17277.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.8287 -0.6532  0.0009  0.6469  3.6402 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  pID      (Intercept) 0.23296  0.4827              
##           condother   0.04633  0.2152   -0.18      
##           condself    0.07389  0.2718   -0.07  0.59
##  Residual             0.97663  0.9882              
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##                          Estimate  Std. Error          df t value Pr(>|t|)   
## (Intercept)              0.014164    0.060933  111.151603   0.232  0.81662   
## condother                0.087816    0.049945  223.557623   1.758  0.08007 . 
## condself                 0.126442    0.053085  194.397498   2.382  0.01819 * 
## topichealth              0.138468    0.044161 5759.544061   3.136  0.00172 **
## condother:topichealth   -0.006027    0.062437 5759.089655  -0.097  0.92311   
## condself:topichealth    -0.076784    0.062454 5759.092400  -1.229  0.21895   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf tpchlt cndth:
## condother   -0.393                            
## condself    -0.335  0.520                     
## topichealth -0.361  0.441  0.415              
## cndthr:tpch  0.255 -0.625 -0.293 -0.707       
## cndslf:tpch  0.255 -0.312 -0.588 -0.707  0.500

mentalizing ROI

There is a main effect of topic, such that health articles elicited greater activity in the mentalizing ROI compared to climate articles.

These data are not consistent with moderation by topic.

mod_h4b = lmer(mentalizing ~ cond * topic + (1 + cond | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h4b = table_model(mod_h4b)

table_h4b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 0.27 [0.15, 0.39] 112.87 4.55 < .001
other 0.06 [-0.04, 0.15] 233.74 1.16 .249
self 0.10 [-0.00, 0.20] 198.59 1.91 .057
topic (health) 0.11 [0.03, 0.20] 5759.59 2.58 .010
other x topic (health) 0.01 [-0.12, 0.13] 5759.16 0.09 .929
self x topic (health) -0.06 [-0.18, 0.07] 5759.10 -0.89 .371

summary

summary(mod_h4b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: mentalizing ~ cond * topic + (1 + cond | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 17285.7
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.6809 -0.6608  0.0154  0.6675  3.2685 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  pID      (Intercept) 0.21874  0.4677              
##           condother   0.03899  0.1975   -0.19      
##           condself    0.06951  0.2637   -0.05  0.61
##  Residual             0.98011  0.9900              
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##                          Estimate  Std. Error          df t value  Pr(>|t|)    
## (Intercept)              0.271239    0.059574  112.871337   4.553 0.0000134 ***
## condother                0.056826    0.049143  233.737498   1.156   0.24873    
## condself                 0.100709    0.052663  198.594010   1.912   0.05727 .  
## topichealth              0.114265    0.044239 5759.592778   2.583   0.00982 ** 
## condother:topichealth    0.005579    0.062548 5759.155171   0.089   0.92893    
## condself:topichealth    -0.055923    0.062564 5759.104448  -0.894   0.37144    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf tpchlt cndth:
## condother   -0.403                            
## condself    -0.334  0.521                     
## topichealth -0.370  0.449  0.419              
## cndthr:tpch  0.262 -0.636 -0.296 -0.707       
## cndslf:tpch  0.262 -0.317 -0.593 -0.707  0.500

combined plot

predicted_h4 = ggeffects::ggpredict(mod_h4a, c("cond", "topic")) %>%
  data.frame() %>%
  mutate(atlas = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h4b, c("cond", "topic")) %>%
              data.frame() %>%
              mutate(atlas = "mentalizing")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

ind_data_h4 = merged %>%
  filter(atlas %in% c("self-referential", "mentalizing")) %>%
  select(topic, pID, cond, run, trial, atlas, parameter_estimate_std) %>%
  unique() %>%
  rename("x" = cond,
         "predicted" = parameter_estimate_std,
         "group" = topic) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

(plot_h4 = predicted_h4 %>%
  ggplot(aes(x = x, y = predicted, color = group)) +
  stat_summary(data = ind_data_h4, aes(group = interaction(pID, group)), fun = "mean", geom = "line", size = .1) +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1, position = position_dodge(.1)) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high, group = group),
                  size = .75, position = position_dodge(.1)) +
  facet_grid(~atlas) +
  scale_color_manual(name = "", values = palette_topic) +
  labs(x = "", y = "ROI activity (SD)\n") +
  plot_aes +
  theme(legend.position = c(.85, .15)))

H5

Are the effect of the experimental manipulations on sharing intentions moderated by article topic?

There is a main effect of topic, such that health articles have higher sharing intentions than climate articles.

These data are not consistent with moderation by topic.

mod_h5 = lmer(value ~ cond * topic + (1 | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

plot

predicted_h5 = ggeffects::ggpredict(mod_h5, c("cond", "topic")) %>%
  data.frame() %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))

ind_data_h5 = merged_wide %>%
  rename("x" = cond,
         "predicted" = value,
         "group" = topic) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))
  
predicted_h5 %>%
  ggplot(aes(x = x, y = predicted, color = group)) +
  stat_summary(data = ind_data_h5, aes(group = interaction(pID, group)), fun = "mean", geom = "line", size = .1) +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1, position = position_dodge(.1)) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high, group = group),
                  size = .75, position = position_dodge(.1)) +
  scale_color_manual(name = "", values = palette_topic) +
  labs(x = "", y = "predicted sharing intention\n") +
  plot_aes +
  theme(legend.position = c(.85, .15))

model table

table_h5 = table_model(mod_h5)

table_h5 %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.53 [2.44, 2.62] 201.75 54.08 < .001
other -0.06 [-0.14, 0.03] 5845.61 -1.35 .176
self -0.04 [-0.13, 0.04] 5845.60 -0.96 .338
topic (health) 0.24 [0.15, 0.32] 5845.59 5.45 < .001
other x topic (health) 0.05 [-0.07, 0.17] 5845.70 0.82 .412
self x topic (health) -0.01 [-0.13, 0.11] 5845.70 -0.13 .899

summary

summary(mod_h5)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ cond * topic + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16585.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5100 -0.7348  0.1009  0.7437  2.1731 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1064   0.3262  
##  Residual             0.9244   0.9614  
## Number of obs: 5935, groups:  pID, 85
## 
## Fixed effects:
##                          Estimate  Std. Error          df t value
## (Intercept)              2.528093    0.046747  201.753181  54.080
## condother               -0.058492    0.043253 5845.610839  -1.352
## condself                -0.041428    0.043275 5845.596303  -0.957
## topichealth              0.235570    0.043231 5845.589904   5.449
## condother:topichealth    0.050166    0.061132 5845.697566   0.821
## condself:topichealth    -0.007801    0.061163 5845.695074  -0.128
##                                   Pr(>|t|)    
## (Intercept)           < 0.0000000000000002 ***
## condother                            0.176    
## condself                             0.338    
## topichealth                   0.0000000527 ***
## condother:topichealth                0.412    
## condself:topichealth                 0.899    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf tpchlt cndth:
## condother   -0.461                            
## condself    -0.461  0.499                     
## topichealth -0.462  0.499  0.499              
## cndthr:tpch  0.326 -0.708 -0.353 -0.707       
## cndslf:tpch  0.326 -0.353 -0.708 -0.707  0.500

H6

Are the relationships between ROI activity positively and sharing intentions moderated by article topic?

self-referential ROI

There is a main effect of topic, such that health articles have higher sharing intentions than climate articles.

These data are not consistent with moderation by topic.

mod_h6a = lmer(value ~ self_referential * topic + (1 + self_referential | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h6a = table_model(mod_h6a)

table_h6a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.49 [2.41, 2.57] 103.54 62.42 < .001
self-referential 0.07 [0.04, 0.11] 241.01 4.18 < .001
topic (health) 0.24 [0.19, 0.29] 5845.60 9.56 < .001
self-referential x topic (health) 0.01 [-0.04, 0.05] 5837.06 0.31 .759

summary

summary(mod_h6a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ self_referential * topic + (1 + self_referential | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16543.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5112 -0.7286  0.1045  0.7516  2.1642 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr 
##  pID      (Intercept)      0.10801  0.32865       
##           self_referential 0.00187  0.04324  -0.32
##  Residual                  0.91661  0.95740       
## Number of obs: 5935, groups:  pID, 85
## 
## Fixed effects:
##                                 Estimate  Std. Error          df t value
## (Intercept)                     2.487373    0.039850  103.543865  62.419
## self_referential                0.073252    0.017534  241.007394   4.178
## topichealth                     0.240266    0.025132 5845.604407   9.560
## self_referential:topichealth    0.006944    0.022631 5837.058866   0.307
##                                          Pr(>|t|)    
## (Intercept)                  < 0.0000000000000002 ***
## self_referential                        0.0000412 ***
## topichealth                  < 0.0000000000000002 ***
## self_referential:topichealth                0.759    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) slf_rf tpchlt
## self_rfrntl -0.114              
## topichealth -0.312  0.047       
## slf_rfrntl:  0.027 -0.655 -0.129

mentalizing ROI

There is a main effect of topic, such that health articles have higher sharing intentions than climate articles.

These data are not consistent with moderation by topic.

mod_h6b = lmer(value ~ mentalizing * topic + (1 + mentalizing | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h6b = table_model(mod_h6b)

table_h6b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.47 [2.39, 2.55] 105.80 61.39 < .001
mentalizing 0.07 [0.04, 0.11] 240.64 4.31 < .001
topic (health) 0.25 [0.20, 0.30] 5840.93 9.37 < .001
mentalizing x topic (health) -0.01 [-0.06, 0.03] 5824.61 -0.51 .607

summary

summary(mod_h6b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ mentalizing * topic + (1 + mentalizing | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16552.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4759 -0.7331  0.1117  0.7614  2.1251 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr 
##  pID      (Intercept) 0.108333 0.3291        
##           mentalizing 0.001211 0.0348   -0.18
##  Residual             0.918533 0.9584        
## Number of obs: 5935, groups:  pID, 85
## 
## Fixed effects:
##                           Estimate Std. Error         df t value
## (Intercept)                2.47032    0.04024  105.79604  61.389
## mentalizing                0.07392    0.01715  240.63866   4.310
## topichealth                0.24726    0.02638 5840.93083   9.374
## mentalizing:topichealth   -0.01169    0.02274 5824.61188  -0.514
##                                     Pr(>|t|)    
## (Intercept)             < 0.0000000000000002 ***
## mentalizing                        0.0000238 ***
## topichealth             < 0.0000000000000002 ***
## mentalizing:topichealth                0.607    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) mntlzn tpchlt
## mentalizing -0.170              
## topichealth -0.319  0.182       
## mntlzng:tpc  0.093 -0.654 -0.327

combined plot

vals = seq(-4.5,4.5,.1)

predicted_h6 = ggeffects::ggpredict(mod_h6a, c("self_referential [vals]", "topic")) %>%
  data.frame() %>%
  mutate(atlas = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h6b, c("mentalizing [vals]", "topic")) %>%
              data.frame() %>%
              mutate(atlas = "mentalizing")) %>%
  mutate(atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

ind_data_h6 = merged %>%
  filter(atlas %in% c("self-referential", "mentalizing")) %>%
  select(topic, pID, cond, run, trial, atlas, parameter_estimate_std, value) %>%
  rename("x" = parameter_estimate_std,
         "predicted" = value,
         "group" = topic) %>%
  mutate(atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

predicted_h6 %>%
  ggplot(aes(x = x, y = predicted, color = group, fill = group)) +
  stat_smooth(data = ind_data_h6, aes(group = interaction(pID, group)), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .2, color = NA) +
  geom_line(size = 2) +
  facet_grid(~atlas) +
  scale_color_manual(name = "", values = palette_topic) +
  scale_fill_manual(name = "", values = palette_topic) +
  labs(y = "predicted sharing intention\n", x = "\nROI activity (SD)") +
  plot_aes +
  theme(legend.position = "top")

combined table

table_h1a %>% mutate(DV = "H1a: Self-relevance") %>%
  bind_rows(table_h1b %>% mutate(DV = "H1b: Social relevance")) %>%
  bind_rows(table_h2a %>% mutate(DV = "H2a: Self-relevance")) %>%
  bind_rows(table_h2b %>% mutate(DV = "H2b: Social relevance")) %>%
  bind_rows(table_h3 %>% mutate(DV = "H3a-b: Sharing intention")) %>%
  bind_rows(table_h4a %>% mutate(DV = "H4a: Self-referential ROI")) %>%
  bind_rows(table_h4b %>% mutate(DV = "H4b: Mentalizing ROI")) %>%
  bind_rows(table_h5 %>% mutate(DV = "H5: Sharing intention")) %>%
  bind_rows(table_h6a %>% mutate(DV = "H6a: Sharing intention")) %>%
  bind_rows(table_h6b %>% mutate(DV = "H6b: Sharing intention")) %>%
  select(DV, everything()) %>%
  kable() %>%
  kable_styling()
DV term b [95% CI] df t p
H1a: Self-relevance intercept 2.49 [2.41, 2.57] 102.45 61.81 < .001
H1a: Self-relevance self-referential 0.03 [0.00, 0.07] 6009.96 2.02 .044
H1a: Self-relevance topic (health) 0.14 [0.09, 0.19] 5927.41 5.50 < .001
H1a: Self-relevance self-referential x topic (health) 0.02 [-0.02, 0.06] 5942.54 0.92 .355
H1b: Social relevance intercept 2.51 [2.43, 2.60] 97.93 58.12 < .001
H1b: Social relevance mentalizing 0.03 [0.00, 0.07] 226.60 2.16 .032
H1b: Social relevance topic (health) 0.29 [0.24, 0.34] 5924.99 12.18 < .001
H1b: Social relevance mentalizing x topic (health) 0.01 [-0.03, 0.05] 5903.41 0.66 .512
H2a: Self-relevance intercept 2.44 [2.35, 2.54] 192.81 51.81 < .001
H2a: Self-relevance other 0.04 [-0.04, 0.13] 5924.29 0.99 .322
H2a: Self-relevance self 0.12 [0.03, 0.20] 5924.33 2.76 .006
H2a: Self-relevance topic (health) 0.23 [0.14, 0.31] 5924.32 5.29 < .001
H2a: Self-relevance other x topic (health) -0.07 [-0.19, 0.05] 5924.39 -1.21 .225
H2a: Self-relevance self x topic (health) -0.17 [-0.29, -0.05] 5924.39 -2.82 .005
H2b: Social relevance intercept 2.49 [2.40, 2.59] 158.48 51.69 < .001
H2b: Social relevance other 0.03 [-0.05, 0.11] 5924.24 0.75 .451
H2b: Social relevance self 0.07 [-0.00, 0.15] 5924.27 1.87 .062
H2b: Social relevance topic (health) 0.31 [0.23, 0.38] 5924.26 7.89 < .001
H2b: Social relevance other x topic (health) 0.03 [-0.08, 0.14] 5924.31 0.55 .584
H2b: Social relevance self x topic (health) -0.06 [-0.16, 0.05] 5924.31 -1.02 .307
H3a-b: Sharing intention intercept 1.26 [1.12, 1.40] 116.24 17.61 < .001
H3a-b: Sharing intention self-relevance 0.29 [0.24, 0.34] 275.19 10.76 < .001
H3a-b: Sharing intention topic (health) -0.11 [-0.25, 0.03] 5856.15 -1.53 .127
H3a-b: Sharing intention social relevance 0.21 [0.14, 0.27] 180.07 6.53 < .001
H3a-b: Sharing intention self-relevance x topic (health) 0.03 [-0.03, 0.09] 5555.76 1.04 .297
H3a-b: Sharing intention topic (health) x social relevance 0.06 [0.00, 0.13] 5628.34 1.99 .047
H4a: Self-referential ROI intercept 0.01 [-0.11, 0.13] 111.15 0.23 .817
H4a: Self-referential ROI other 0.09 [-0.01, 0.19] 223.56 1.76 .080
H4a: Self-referential ROI self 0.13 [0.02, 0.23] 194.40 2.38 .018
H4a: Self-referential ROI topic (health) 0.14 [0.05, 0.23] 5759.54 3.14 .002
H4a: Self-referential ROI other x topic (health) -0.01 [-0.13, 0.12] 5759.09 -0.10 .923
H4a: Self-referential ROI self x topic (health) -0.08 [-0.20, 0.05] 5759.09 -1.23 .219
H4b: Mentalizing ROI intercept 0.27 [0.15, 0.39] 112.87 4.55 < .001
H4b: Mentalizing ROI other 0.06 [-0.04, 0.15] 233.74 1.16 .249
H4b: Mentalizing ROI self 0.10 [-0.00, 0.20] 198.59 1.91 .057
H4b: Mentalizing ROI topic (health) 0.11 [0.03, 0.20] 5759.59 2.58 .010
H4b: Mentalizing ROI other x topic (health) 0.01 [-0.12, 0.13] 5759.16 0.09 .929
H4b: Mentalizing ROI self x topic (health) -0.06 [-0.18, 0.07] 5759.10 -0.89 .371
H5: Sharing intention intercept 2.53 [2.44, 2.62] 201.75 54.08 < .001
H5: Sharing intention other -0.06 [-0.14, 0.03] 5845.61 -1.35 .176
H5: Sharing intention self -0.04 [-0.13, 0.04] 5845.60 -0.96 .338
H5: Sharing intention topic (health) 0.24 [0.15, 0.32] 5845.59 5.45 < .001
H5: Sharing intention other x topic (health) 0.05 [-0.07, 0.17] 5845.70 0.82 .412
H5: Sharing intention self x topic (health) -0.01 [-0.13, 0.11] 5845.70 -0.13 .899
H6a: Sharing intention intercept 2.49 [2.41, 2.57] 103.54 62.42 < .001
H6a: Sharing intention self-referential 0.07 [0.04, 0.11] 241.01 4.18 < .001
H6a: Sharing intention topic (health) 0.24 [0.19, 0.29] 5845.60 9.56 < .001
H6a: Sharing intention self-referential x topic (health) 0.01 [-0.04, 0.05] 5837.06 0.31 .759
H6b: Sharing intention intercept 2.47 [2.39, 2.55] 105.80 61.39 < .001
H6b: Sharing intention mentalizing 0.07 [0.04, 0.11] 240.64 4.31 < .001
H6b: Sharing intention topic (health) 0.25 [0.20, 0.30] 5840.93 9.37 < .001
H6b: Sharing intention mentalizing x topic (health) -0.01 [-0.06, 0.03] 5824.61 -0.51 .607

cite packages

report::cite_packages()
##   - Angelo Canty and Brian Ripley (2021). boot: Bootstrap R (S-Plus) Functions. R package version 1.3-28.
##   - Douglas Bates, Martin Maechler and Mikael Jagan (2023). Matrix: Sparse and Dense Matrix Classes and Methods. R package version 1.5-4. https://CRAN.R-project.org/package=Matrix
##   - Douglas Bates, Martin Maechler, Ben Bolker, Steve Walker (2015). Fitting Linear Mixed-Effects Models Using lme4. Journal of Statistical Software, 67(1), 1-48. doi:10.18637/jss.v067.i01.
##   - H. Wickham. ggplot2: Elegant Graphics for Data Analysis. Springer-Verlag New York, 2016.
##   - Hadley Wickham (2019). stringr: Simple, Consistent Wrappers for Common String Operations. R package version 1.4.0. https://CRAN.R-project.org/package=stringr
##   - Hadley Wickham (2021). forcats: Tools for Working with Categorical Variables (Factors). R package version 0.5.1. https://CRAN.R-project.org/package=forcats
##   - Hadley Wickham and Maximilian Girlich (2022). tidyr: Tidy Messy Data. R package version 1.2.0. https://CRAN.R-project.org/package=tidyr
##   - Hadley Wickham, Jennifer Bryan and Malcolm Barrett (2021). usethis: Automate Package and Project Setup. R package version 2.1.5. https://CRAN.R-project.org/package=usethis
##   - Hadley Wickham, Jim Hester and Jennifer Bryan (2022). readr: Read Rectangular Text Data. R package version 2.1.2. https://CRAN.R-project.org/package=readr
##   - Hadley Wickham, Jim Hester, Winston Chang and Jennifer Bryan (2021). devtools: Tools to Make Developing R Packages Easier. R package version 2.4.3. https://CRAN.R-project.org/package=devtools
##   - Hadley Wickham, Romain François, Lionel Henry and Kirill Müller (2022). dplyr: A Grammar of Data Manipulation. R package version 1.0.9. https://CRAN.R-project.org/package=dplyr
##   - Hao Zhu (2021). kableExtra: Construct Complex Table with 'kable' and Pipe Syntax. R package version 1.3.4. https://CRAN.R-project.org/package=kableExtra
##   - Jim Hester, Hadley Wickham and Gábor Csárdi (2021). fs: Cross-Platform File System Operations Based on 'libuv'. R package version 1.5.2. https://CRAN.R-project.org/package=fs
##   - Kirill Müller and Hadley Wickham (2022). tibble: Simple Data Frames. R package version 3.1.8. https://CRAN.R-project.org/package=tibble
##   - Kuznetsova A, Brockhoff PB, Christensen RHB (2017). "lmerTest Package:Tests in Linear Mixed Effects Models." _Journal of StatisticalSoftware_, *82*(13), 1-26. doi: 10.18637/jss.v082.i13 (URL:https://doi.org/10.18637/jss.v082.i13).
##   - Lionel Henry and Hadley Wickham (2020). purrr: Functional Programming Tools. R package version 0.3.4. https://CRAN.R-project.org/package=purrr
##   - Lüdecke D (2018). "ggeffects: Tidy Data Frames of Marginal Effects fromRegression Models." _Journal of Open Source Software_, *3*(26), 772.doi: 10.21105/joss.00772 (URL: https://doi.org/10.21105/joss.00772).
##   - R Core Team (2021). R: A language and environment for statistical computing. R Foundation for Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.
##   - Rinker, T. W. & Kurkiewicz, D. (2017). pacman: Package Management for R. version 0.5.0. Buffalo, New York. http://github.com/trinker/pacman
##   - Wickham et al., (2019). Welcome to the tidyverse. Journal of Open Source Software, 4(43), 1686, https://doi.org/10.21105/joss.01686
##   - Yihui Xie (2021). knitr: A General-Purpose Package for Dynamic Report Generation in R. R package version 1.37.